#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE _PAGE_TABLE
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define __PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
#define __PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER )
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define __PAGE_KERNEL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
#define __PAGE_KERNEL_VSYSCALL \
- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_USER )
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD | _PAGE_USER )
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
#define __PAGE_KERNEL_LARGE \
- (__PAGE_KERNEL | _PAGE_PSE | _PAGE_USER )
+ (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC \
- (__PAGE_KERNEL_EXEC | _PAGE_PSE | _PAGE_USER )
-
+ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
/*
* We don't support GLOBAL page in xenolinux64
can temporarily clear it. */
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
}
#endif /* 4 level */
+#ifdef __x86_64__
+#define adjust_guest_l1e(pl1e) \
+ do { \
+ if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) ) \
+ l1e_add_flags((pl1e), _PAGE_USER); \
+ } while ( 0 )
+
+#define adjust_guest_l2e(pl2e) \
+ do { \
+ if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) ) \
+ l2e_add_flags((pl2e), _PAGE_USER); \
+ } while ( 0 )
+
+#define adjust_guest_l3e(pl3e) \
+ do { \
+ if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
+ l3e_add_flags((pl3e), _PAGE_USER); \
+ } while ( 0 )
+
+#define adjust_guest_l4e(pl4e) \
+ do { \
+ if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) ) \
+ l4e_add_flags((pl4e), _PAGE_USER); \
+ } while ( 0 )
+#else
+#define adjust_guest_l1e(_p) ((void)0)
+#define adjust_guest_l2e(_p) ((void)0)
+#define adjust_guest_l3e(_p) ((void)0)
+#endif
void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
{
pl1e = map_domain_page(pfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+ {
if ( is_guest_l1_slot(i) &&
unlikely(!get_page_from_l1e(pl1e[i], d)) )
goto fail;
+ adjust_guest_l1e(pl1e[i]);
+ }
+
unmap_domain_page(pl1e);
return 1;
if ( is_guest_l2_slot(type, i) &&
unlikely(!get_page_from_l2e(pl2e[i], pfn, d, vaddr)) )
goto fail;
+
+ adjust_guest_l2e(pl2e[i]);
}
#if CONFIG_PAGING_LEVELS == 2
if ( is_guest_l3_slot(i) &&
unlikely(!get_page_from_l3e(pl3e[i], pfn, d, vaddr)) )
goto fail;
+
+ adjust_guest_l3e(pl3e[i]);
}
if ( !create_pae_xen_mappings(pl3e) )
if ( is_guest_l4_slot(i) &&
unlikely(!get_page_from_l4e(pl4e[i], pfn, d, vaddr)) )
goto fail;
+
+ adjust_guest_l4e(pl4e[i]);
}
/* Xen private mappings. */
return 0;
}
+ adjust_guest_l1e(nl1e);
+
/* Fast path for identical mapping, r/w and presence. */
if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
return update_l1e(pl1e, ol1e, nl1e, gl1mfn, current);
return 0;
}
+ adjust_guest_l2e(nl2e);
+
/* Fast path for identical mapping and presence. */
if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT))
return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn);
return 0;
}
+ adjust_guest_l3e(nl3e);
+
/* Fast path for identical mapping and presence. */
if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn);
return 0;
}
+ adjust_guest_l4e(nl4e);
+
/* Fast path for identical mapping and presence. */
if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn);
static int create_grant_pte_mapping(
- unsigned long pte_addr, l1_pgentry_t _nl1e, struct vcpu *v)
+ unsigned long pte_addr, l1_pgentry_t nl1e, struct vcpu *v)
{
int rc = GNTST_okay;
void *va;
ASSERT(spin_is_locked(&d->big_lock));
+ adjust_guest_l1e(nl1e);
+
gmfn = pte_addr >> PAGE_SHIFT;
mfn = gmfn_to_mfn(d, gmfn);
}
ol1e = *(l1_pgentry_t *)va;
- if ( !update_l1e(va, ol1e, _nl1e, mfn, v) )
+ if ( !update_l1e(va, ol1e, nl1e, mfn, v) )
{
put_page_type(page);
rc = GNTST_general_error;
static int create_grant_va_mapping(
- unsigned long va, l1_pgentry_t _nl1e, struct vcpu *v)
+ unsigned long va, l1_pgentry_t nl1e, struct vcpu *v)
{
l1_pgentry_t *pl1e, ol1e;
struct domain *d = v->domain;
ASSERT(spin_is_locked(&d->big_lock));
+ adjust_guest_l1e(nl1e);
+
pl1e = &linear_pg_table[l1_linear_offset(va)];
if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ||
- !update_l1e(pl1e, ol1e, _nl1e,
+ !update_l1e(pl1e, ol1e, nl1e,
l2e_get_pfn(__linear_l2_table[l2_linear_offset(va)]), v) )
return GNTST_general_error;
}
}
+ adjust_guest_l1e(nl1e);
+
/* Checked successfully: do the update (write or cmpxchg). */
pl1e = map_domain_page(page_to_mfn(page));
pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));